home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Language/OS - Multiplatform Resource Library
/
LANGUAGE OS.iso
/
pcr
/
pcr4_4.lha
/
DIST
/
gc
/
GCallochblk.c
< prev
next >
Wrap
C/C++ Source or Header
|
1992-02-25
|
20KB
|
636 lines
/* begincopyright
Copyright (c) 1988,1990 Xerox Corporation. All rights reserved.
Use and copying of this software and preparation of derivative works based
upon this software are permitted. Any distribution of this software or
derivative works must comply with all applicable United States export
control laws. This software is made available AS IS, and Xerox Corporation
makes no warranty about the software, its performance or its conformity to
any specification. Any person obtaining a copy of this software is requested
to send their name and post office or electronic mail address to:
PCR Coordinator
Xerox PARC
3333 Coyote Hill Rd.
Palo Alto, CA 94304
Parts of this software were derived from code bearing the copyright notice:
Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
This material may be freely distributed, provided this notice is retained.
This material is provided as is, with no warranty expressed or implied.
Use at your own risk.
endcopyright */
/*
* Boehm, October 11, 1991 10:31:22 am PDT
*/
#define DEBUG
#define VERBOSE
#undef VERBOSE
#include "xr/GCPrivate.h"
#include "xr/Threads.h"
#include "xr/ThreadsMsg.h"
#define I_HOLD_ML(ml) (((XR_ML)(ml))->ml_holder == XR_currThread)
#ifdef BLACK_LIST
# define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
/* largest block we will allocate starting on a black */
/* listed block. Must be >= HBLKSIZE. */
#endif
/**/
/* allocate/free routines for heap blocks
/* Note that everything called from outside the garbage collector
/* should be prepared to abort at any point as the result of a signal.
/* GC_expand_hp is also here.
/**/
/*
* Free heap blocks are kept on a list sorted by address.
* The hb_hdr.hbh_sz field of a free heap block contains the length
* (in bytes) of the entire block.
* Neighbors are coalesced.
*/
struct hblk *GC_savhbp = (struct hblk *)0; /* heap block preceding next */
/* block to be examined by */
/* allochblk. */
/* Protected by GC_allocate_ml */
int GC_small_blocks = 0; /* Number of free blocks of size 1 on the */
/* heap block free list. */
/*
* Return TRUE if there is a heap block sufficient for object size sz,
* FALSE otherwise. Advance GC_savhbp to point to the block prior to the
* first such block.
* Should hold GC_allocate_ml, since GC_savhbp is updated.
*/
bool GC_sufficient_hb(sz)
int sz;
{
register struct hblk *hbp, *h;
struct hblk *prevhbp;
int size_needed, size_avail;
register char * GC_heapstart_reg = GC_heapstart;
# define GC_heapstart GC_heapstart_reg
int first_time = 1;
if (!I_HOLD_ML(&GC_allocate_ml)) {
GC_abort("GC_sufficient_hb 0");
}
size_needed = WORDS_TO_BYTES(abs(sz));
size_needed = (size_needed+HDR_BYTES+HBLKSIZE-1) & ~HBLKMASK;
# ifdef VERBOSE
GC_vprintf("sufficient_hb: sz = %d, size_needed = 0x%X\n",
sz, size_needed);
# endif
/* search for a big enough block in free list */
hbp = GC_savhbp;
for(;;) {
prevhbp = hbp;
hbp = ((prevhbp == (struct hblk *)0)
? GC_hblkfreelist
: hb_next(prevhbp));
if( prevhbp == GC_savhbp && !first_time) {
/* no sufficiently big blocks on free list */
return(FALSE);
}
first_time = 0;
if( hbp == (struct hblk *)0 ) continue;
size_avail = hb_sz(hbp);
# ifdef DEBUG
if (GC_small_blocks == 0 && size_avail == HBLKSIZE) {
GC_abort("GC_sufficient_hb: GC_small_blocks is wrong\n");
}
# endif DEBUG
# ifdef BLACK_LIST
if( size_avail >= size_needed && sz < 0
&& size_needed <= MAX_BLACK_LIST_ALLOC) {
GC_savhbp = prevhbp;
return(TRUE);
}
h = hbp;
while (size_avail >= size_needed && is_black_listed(h)) {
h++; size_avail -= HBLKSIZE;
}
# endif
if( size_avail >= size_needed) {
GC_savhbp = prevhbp;
return(TRUE);
}
}
# undef GC_heapstart;
}
/*
* Allocate (and return pointer to) a heap block
* for objects of size |sz|.
*
* NOTE: Caller is responsible for adding it to global hblklist
* and for building an object freelist in it.
* We allocate the header, no matter where it resides.
*
* The new block is guaranteed to be cleared if sz > 0.
* Returns (struct hblk *)0 if the allocation request fails.
*/
struct hblk *
GC_allochblk(sz)
long sz;
{
register struct hblk *thishbp;
register struct hblk *hbp;
struct hblk *prevhbp;
long size_needed, /* number of bytes in requested objects */
uninit, /* => Found uninitialized block */
size_avail;
register char * GC_heapstart_reg = GC_heapstart;
# define GC_heapstart GC_heapstart_reg
int first_time = 1;
if (!I_HOLD_ML(&GC_allocate_ml)) {
GC_abort("GC_allochblk 0");
}
size_needed = WORDS_TO_BYTES(sz>0? sz : -sz);
size_needed = (size_needed+HDR_BYTES+HBLKSIZE-1) & ~HBLKMASK;
# ifdef VERBOSE
GC_vprintf(
"(allochblk) sz = %x, size_needed = 0x%X\n", sz, size_needed);
# endif
/* search for a big enough block in free list */
hbp = GC_savhbp;
for(;;) {
prevhbp = hbp;
hbp = ((prevhbp == (struct hblk *)0)
? GC_hblkfreelist
: hb_next(prevhbp));
if( prevhbp == GC_savhbp && !first_time) {
/* no sufficiently big blocks on free list, */
/* expand the heap and try again. */
int blocks_to_get =
divHBLKSZ(size_needed + HBLKSIZE
+ GC_composite_in_use/GC_free_mem_ratio);
if (blocks_to_get < MIN_HINCR) {
blocks_to_get = MIN_HINCR;
}
if (!GC_expand_hp(blocks_to_get, FALSE)) {
GC_iprintf(
"Out of memory - attempting emergency collection\n");
/* This the right thing? */
GC_demand_full_and_wait();
GC_reclaim_empty_pages();
if (GC_sufficient_hb(sz)) {
return(GC_allochblk(sz));
} else {
return((struct hblk *)0);
}
}
# ifdef PRINTSTATS
GC_printf("Increased heap size for size %d request\n",
sz);
# endif
return(GC_allochblk(sz));
}
first_time = 0;
if( hbp == (struct hblk *)0 ) continue;
size_avail = hb_sz(hbp);
# ifdef BLACK_LIST
if (sz > 0 || size_needed > MAX_BLACK_LIST_ALLOC) {
while(is_black_listed(hbp) && size_avail >= size_needed) {
size_avail -= HBLKSIZE;
if (size_avail >= size_needed) {
/* allocate the first block, and drop it. It'll be */
/* reconsidered after the next collection. */
thishbp = hbp;
hbp = thishbp + 1;
if (thishbp == GC_savhbp) {
/* Advance GC_savhbp so that termination test is */
/* still meaningful. */
GC_savhbp = hbp;
}
# ifdef SEPARATE_HEADERS
GC_get_header(hbp);
# endif
hb_uninit(hbp) = hb_uninit(thishbp);
hb_next(hbp) = hb_next(thishbp);
hb_sz(hbp) = size_avail;
if (size_avail == HBLKSIZE) {
GC_small_blocks++;
}
if( prevhbp == (struct hblk *)0 ) {
GC_hblkfreelist = hbp;
} else {
hb_next(prevhbp) = hbp;
}
hb_sz(thishbp) = -BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES);
{
register word *p = (word *)(&(hb_marks(thishbp)[0]));
register word * plim =
(word *)(&(hb_marks(thishbp)[MARK_BITS_SZ]));
while (p < plim) {
*p++ = 0;
}
}
hb_busy(thishbp) = 0;
hb_tenured(thishbp) = 0;
GC_add_hblklist(thishbp);
} /* else dont bother ... */
}
}
# endif /* BLACK_LIST */
if( size_avail >= size_needed) {
/* found a big enough block */
/* let thishbp --> the block */
/* set prevhbp, hbp to bracket it */
thishbp = hbp;
if( size_avail == size_needed ) {
if (size_needed == HBLKSIZE) {
GC_small_blocks--;
}
hbp = hb_next(hbp);
uninit = hb_uninit(thishbp);
} else if (size_needed == HBLKSIZE
# ifdef BLACK_LIST
&& sz < 0
# endif
&& GC_small_blocks >= 1) {
continue;
} else {
register unsigned long new_sz;
uninit = hb_uninit(thishbp);
hbp = (struct hblk *)
(((unsigned long)thishbp) + size_needed);
# ifdef SEPARATE_HEADERS
GC_get_header(hbp);
# endif
hb_uninit(hbp) = uninit;
hb_next(hbp) = hb_next(thishbp);
new_sz = size_avail - size_needed;
hb_sz(hbp) = new_sz;
if (new_sz == HBLKSIZE) {
GC_small_blocks++;
}
}
/* remove *thishbp from hblk freelist */
if( prevhbp == (struct hblk *)0 ) {
GC_hblkfreelist = hbp;
} else {
hb_next(prevhbp) = hbp;
}
/* save current list search position */
/* Don't look at a block we just split again, since */
/* that would result in huge chunks getting */
/* completely disassembled. */
GC_savhbp = hbp;
break;
}
}
/* set size field of *thishbp correctly */
hb_sz(thishbp) = sz;
/* Clear block if necessary */
if (uninit && sz > 0) {
register word * p = &(thishbp -> hb_body[0]);
register word * plim;
plim = (word *)(((char *)thishbp) + size_needed);
/* If p is not already 16 byte aligned, then this will */
/* cause it to point into the mark bits, which is safe. */
p = (word*)(((unsigned long)p) & ~(4 * sizeof(word) - 1));
while (p < plim) {
p[0] = 0;
p[1] = 0;
p[2] = 0;
p[3] = 0;
p += 4;
}
}
/* Clear mark bits, hb_busy, and hb_tenured */
{
register word *p = (word *)(&(hb_marks(thishbp)[0]));
register word * plim = (word *)(&(hb_marks(thishbp)[MARK_BITS_SZ]));
while (p < plim) {
*p++ = 0;
}
}
hb_busy(thishbp) = 0;
hb_tenured(thishbp) = 0;
# ifdef VERBOSE
GC_vprintf("Returning 0x%X\n", thishbp);
# endif
# ifdef VISUALIZE
displayAllocHblk(thishbp, BYTES_TO_WORDS(size_needed));
# endif
return( thishbp );
# undef GC_heapstart;
}
/* Clear the header information in a previously allocated heap block p */
/* so that it can be coalesced with an initialized heap block. */
static GC_clear_header(p)
register struct hblk *p;
{
# ifndef SEPARATE_HEADERS
hb_sz(p) = 0;
hb_next(p) = (struct hblk *)0;
hb_sz_link(p) = (struct hblk *)0;
hb_uninit(p) = 0;
hb_tenured(p) = 0;
hb_busy(p) = 0;
hb_last_reclaimed(p) = 0;
# ifdef ALIGN_DOUBLE
hb_dummy(p) = 0;
# endif
# if MARK_BITS_SZ <= 56
/* Since this block was deallocated, only spurious mark */
/* bits corresponding to the header could conceivably be set */
hb_marks(p)[0] = 0;
hb_marks(p)[1] = 0;
# else
--> fix it
# endif
# endif
}
/*
* Free a heap block.
*
* Assume the block is not currently on hblklist.
*
* Coalesce the block with its neighbors if possible.
* All mark words (except possibly the first) are assumed to be cleared.
* The body is assumed to be cleared unless hb_uninit is nonzero.
*/
void
GC_freehblk(p)
register struct hblk *p;
{
register struct hblk *hbp, *prevhbp;
register long size;
register long prev_sz;
register char * GC_heapstart_reg = GC_heapstart;
# define GC_heapstart GC_heapstart_reg
/* GC_savhbp may become invalid due to coalescing. Clear it. */
GC_savhbp = (struct hblk *)0;
size = HB_SIZE(p);
size =
((WORDS_TO_BYTES(size)+HDR_BYTES+HBLKSIZE-1)
& (~HBLKMASK));
# ifdef VISUALIZE
displayFreeHblk(p, BYTES_TO_WORDS(size));
# endif
prevhbp = (struct hblk *) 0;
hbp = GC_hblkfreelist;
while( (hbp != (struct hblk *)0) && (hbp < p) ) {
prevhbp = hbp;
hbp = hb_next(hbp);
}
if (hbp != 0 && (char *)p + size > (char *)hbp) {
XR_ConsoleMsg("Duplicate large block deallocation\n");
XR_ConsoleMsg("%? arg = 0x%X -> 0x%X, 0x%X, 0x%X, ",
p, ((word *)p)[0], ((word *)p)[1], ((word *)p)[2]);
XR_ConsoleMsg("0x%X, 0x%X, 0x%X\n",
((word *)p)[3], ((word *)p)[4], ((word *)p)[5]);
XR_ConsoleMsg("block size = %d (bytes ?)\n", hb_sz(p));
XR_ConsoleMsg("%? old = 0x%X -> 0x%X, 0x%X, 0x%X, ",
hbp, ((word *)hbp)[0], ((word *)hbp)[1],
((word *)hbp)[2]);
XR_ConsoleMsg("0x%X, 0x%X, 0x%X\n",
((word *)hbp)[3], ((word *)hbp)[4], ((word *)hbp)[5]);
XR_ConsoleMsg("block size = %d (bytes ?)\n", hb_sz(hbp));
XR_ConsoleMsg("Calling debugger - Thaw thread to continue\n");
XR_CallDebugger("Duplicate large block deallocation\n");
return;
}
if (prevhbp != 0 && (char *)prevhbp + hb_sz(prevhbp) > (char *)p) {
XR_ConsoleMsg("Large block deallocation overlaps free block tail\n");
XR_ConsoleMsg("%? arg = 0x%X -> 0x%X, 0x%X, 0x%X, ",
p, ((word *)p)[0], ((word *)p)[1], ((word *)p)[2]);
XR_ConsoleMsg("0x%X, 0x%X, 0x%X\n",
((word *)p)[3], ((word *)p)[4], ((word *)p)[5]);
XR_ConsoleMsg("block size = %d (bytes ?)\n", hb_sz(p));
XR_ConsoleMsg("%? old = 0x%X -> 0x%X, 0x%X, 0x%X, ",
prevhbp, ((word *)prevhbp)[0], ((word *)prevhbp)[1],
((word *)prevhbp)[2]);
XR_ConsoleMsg("0x%X, 0x%X, 0x%X\n",
((word *)prevhbp)[3], ((word *)prevhbp)[4],
((word *)prevhbp)[5]);
XR_ConsoleMsg("block size = %d (bytes ?)\n", hb_sz(prevhbp));
XR_ConsoleMsg("Calling debugger - Thaw thread to continue\n");
XR_CallDebugger("Duplicate large block deallocation\n");
return;
}
hb_sz(p) = size;
if (size == HBLKSIZE) GC_small_blocks++;
/* Coalesce with successor, if possible */
if( (((unsigned)p)+size) == ((unsigned)hbp) ) {
register long next_sz = hb_sz(hbp);
hb_uninit(p) |= hb_uninit(hbp);
hb_next(p) = hb_next(hbp);
if (next_sz == HBLKSIZE) GC_small_blocks--;
if (size == HBLKSIZE) GC_small_blocks--;
size += next_sz;
hb_sz(p) = size;
if (!hb_uninit(p)) GC_clear_header(hbp);
} else {
hb_next(p) = hbp;
}
if( prevhbp == (struct hblk *)0 ) {
GC_hblkfreelist = p;
} else {
prev_sz = hb_sz(prevhbp);
if( (((unsigned)prevhbp) + prev_sz) == ((unsigned)p) ) {
/* Coalesce with predecessor */
(hb_uninit(prevhbp)) |= (hb_uninit(p));
hb_next(prevhbp) = hb_next(p);
if (prev_sz == HBLKSIZE) GC_small_blocks--;
if (size == HBLKSIZE) GC_small_blocks--;
hb_sz(prevhbp) = prev_sz + size;
if (!hb_uninit(prevhbp)) GC_clear_header(p);
} else {
hb_next(prevhbp) = p;
}
}
# undef GC_heapstart
}
/* Add a heap block to hblkmap. */
void GC_add_hblklist(hbp)
struct hblk * hbp;
{
long size = HB_SIZE(hbp);
long index = divHBLKSZ(((long)hbp) - ((long)GC_heapstart));
long i;
size = (divHBLKSZ(WORDS_TO_BYTES(size)+HDR_BYTES+HBLKSIZE-1));
/* in units of HBLKSIZE */
hblkmap[index] = HBLK_VALID;
for (i = 1; i < size; i++) {
if (i < 0x7f) {
hblkmap[index+i] = i;
} else {
/* May overflow a char. Store largest possible value */
hblkmap[index+i] = 0x7e;
}
}
}
/* Delete a heap block from hblklist or hblkmap. */
void GC_del_hblklist(hbp)
struct hblk * hbp;
{
long size = HB_SIZE(hbp);
long index = divHBLKSZ(((long)hbp) - ((long)GC_heapstart));
long i;
size = (divHBLKSZ(WORDS_TO_BYTES(size)+HDR_BYTES+HBLKSIZE-1));
/* in units of HBLKSIZE */
if (hblkmap[index] == HBLK_INVALID) {
/* This implies a duplicate deallocation. We catch it elsewhere. */
XR_ConsoleMsg("%? GC_del_hblklist: redundant call\n");
return;
}
for (i = 0; i < size; i++) {
hblkmap[index+i] = HBLK_INVALID;
}
}
typedef void (*GC_expand_call_back_type)(/*XR_Pointer client_data*/);
static GC_expand_call_back_type expand_call_back = NIL;
static XR_Pointer expand_call_back_data = NIL;
void GC_register_expand_callback(fn, data, Ofn, Odata)
GC_expand_call_back_type fn;
XR_Pointer data;
GC_expand_call_back_type *Ofn;
XR_Pointer *Odata;
{
XR_MonitorEntry(&GC_allocate_ml);
if (Ofn != 0) {
*Ofn = expand_call_back;
}
if (Odata != 0) {
*Odata = expand_call_back_data;
}
expand_call_back = fn;
expand_call_back_data = data;
XR_MonitorExit(&GC_allocate_ml);
}
unsigned long GC_max_heap_size = HEAPLIM - HEAPSTART;
/*
* this explicitly increases the size of the heap. It is used
* internally, but may also be invoked directly by the user.
* The argument is in units of HBLKSIZE.
* The second argument is TRUE iff this is being called before the threads
* world has been started.
* Returns TRUE on success, FALSE on failure.
* Assumes we hold GC_allocate_ml.
*/
bool GC_expand_hp(n,initial)
unsigned n;
{
struct hblk * GC_get_sys_mem();
struct hblk * thishbp;
char * block_end;
# ifdef SEPARATE_HEADERS
long hdr_blks = divHBLKSZ(n * (sizeof (struct hblkhdr)) + HBLKSIZE - 1);
long size_to_get = (n + hdr_blks) * HBLKSIZE;
# else
long size_to_get = n * HBLKSIZE;
# endif
long final_size = GC_heapsize + n * HBLKSIZE;
if (final_size > GC_max_heap_size) {
if (expand_call_back != NIL) {
(*expand_call_back)(expand_call_back_data);
}
if (final_size > GC_max_heap_size) {
return(FALSE);
}
}
if (initial) {
thishbp = GC_get_sys_mem_initial(size_to_get);
} else {
if (! I_HOLD_ML(&GC_allocate_ml)) {
XR_ConsoleMsg("%? GC_expand_hp called without GC_allocate_ml\n");
/* Might want to panic here ... */
}
thishbp = GC_get_sys_mem(size_to_get);
}
if( thishbp == (struct hblk *)0 ) {
return(FALSE);
}
block_end = (char *) (((unsigned)thishbp) + n * HBLKSIZE);
if (block_end > GC_heaplim) {
GC_heaplim = block_end;
if (((struct hblk *)GC_heaplim)
- (struct hblk *)GC_heapstart > MAP_SIZE) {
GC_abort("Exceeded maximum heap size - reconfigure GC!");
}
}
# ifdef PRINTSTATS
XR_LogVMsg "Increasing heap size by %d\n", n*HBLKSIZE);
# endif
# ifdef SEPARATE_HEADERS
GC_use_as_headers(block_end, hdr_blks);
GC_get_header(thishbp);
# endif
hb_sz(thishbp) = BYTES_TO_WORDS(n * HBLKSIZE - HDR_BYTES);
GC_freehblk(thishbp);
GC_heapsize = final_size;
if (!initial && (GC_mode & DIRTY_BITS_REQUIRED)) {
/* This is always safe, in that this is untouched memory */
GC_clear_some_dirty_bits(thishbp, GC_heaplim);
}
return(TRUE);
}
/*
* Returns TRUE if at least N GC pages (i.e. hblocks) are free (no
* objects at all on them) and 0 otherwise.
* (Written this way, rather than just to return the number of free
* pages, to keep working set down by truncating search.)
*/
bool
XR_NfreePagesP(N)
unsigned N;
{
register struct hblk *hbp;
register pageCount = 0;
hbp = GC_hblkfreelist;
while ( (hbp != (struct hblk *)0) && pageCount < N) {
pageCount += ((WORDS_TO_BYTES(hb_sz(hbp))+HBLKSIZE-1)/HBLKSIZE);
hbp = hb_next(hbp);
}
return pageCount >= N;
}